Initial SMP support
authordjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Wed, 12 Oct 2005 23:12:59 +0000 (17:12 -0600)
committerdjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Wed, 12 Oct 2005 23:12:59 +0000 (17:12 -0600)
Signed-off by: Tristan Gingold <Tristan.Gingold@bull.net>

17 files changed:
xen/arch/ia64/linux-xen/head.S
xen/arch/ia64/linux-xen/irq_ia64.c
xen/arch/ia64/linux-xen/mm_contig.c
xen/arch/ia64/linux-xen/setup.c
xen/arch/ia64/linux-xen/smp.c
xen/arch/ia64/linux-xen/smpboot.c
xen/arch/ia64/xen/acpi.c
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/process.c
xen/arch/ia64/xen/xenirq.c
xen/arch/ia64/xen/xenmisc.c
xen/arch/ia64/xen/xensetup.c
xen/arch/ia64/xen/xentime.c
xen/include/asm-ia64/config.h
xen/include/asm-ia64/linux-xen/asm/spinlock.h
xen/include/asm-ia64/linux-xen/linux/hardirq.h
xen/include/asm-ia64/linux-xen/linux/interrupt.h

index 9da143d979a2de10b311ecdc105d65fae34aa020..14e919ddf0e0cc26e0d27f2c0c4f734c7548ac43 100644 (file)
@@ -324,6 +324,9 @@ start_ap:
        mov r16=-1
 (isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it
 
+#ifndef XEN
+       // XEN: stack is allocated in xenheap, which is currently always
+       //  mapped.
        // load mapping for stack (virtaddr in r2, physaddr in r3)
        rsm psr.ic
        movl r17=PAGE_KERNEL
@@ -353,7 +356,8 @@ start_ap:
        ssm psr.ic
        srlz.d
        ;;
-
+#endif
+       
 .load_current:
        // load the "current" pointer (r13) and ar.k6 with the current task
 #if defined(XEN) && defined(VALIDATE_VT)
index 5436ef8694d37d0dfe4e7d82a9e1990a10b5442a..c036ed08ee28b44db4bdec0177be33c396bac67c 100644 (file)
@@ -281,5 +281,8 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
        ipi_data = (delivery_mode << 8) | (vector & 0xff);
        ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
 
+#ifdef XEN
+       printf ("send_ipi to %d (%x)\n", cpu, phys_cpu_id);
+#endif
        writeq(ipi_data, ipi_addr);
 }
index 6b9ee53aca4c02c0ec990b1770ff72c776b283b5..896a871c3b277a1102c0f586d8594ca265c34fe8 100644 (file)
@@ -193,8 +193,8 @@ per_cpu_init (void)
         */
        if (smp_processor_id() == 0) {
 #ifdef XEN
-               cpu_data = alloc_xenheap_pages(PERCPU_PAGE_SHIFT -
-                       PAGE_SHIFT + get_order(NR_CPUS));
+               cpu_data = alloc_xenheap_pages(get_order(NR_CPUS
+                                                        * PERCPU_PAGE_SIZE));
 #else
                cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
                                           PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
index ba7f91098dcb67887a54981ddeaaa1eba9d86448..7f351dd05c983a2d2ddbb1e330bd15e905329c4b 100644 (file)
@@ -366,6 +366,7 @@ check_for_logical_procs (void)
 }
 #endif
 
+void __init
 #ifdef XEN
 early_setup_arch (char **cmdline_p)
 #else
@@ -377,14 +378,12 @@ setup_arch (char **cmdline_p)
        ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
 
        *cmdline_p = __va(ia64_boot_param->command_line);
-#ifdef XEN
-       efi_init();
-#else
+#ifndef XEN
        strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+#endif
 
        efi_init();
        io_port_init();
-#endif
 
 #ifdef CONFIG_IA64_GENERIC
        {
@@ -414,11 +413,17 @@ setup_arch (char **cmdline_p)
 #ifdef XEN
        early_cmdline_parse(cmdline_p);
        cmdline_parse(*cmdline_p);
-#undef CONFIG_ACPI_BOOT
 #endif
        if (early_console_setup(*cmdline_p) == 0)
                mark_bsp_online();
 
+#ifdef XEN
+}
+
+void __init
+late_setup_arch (char **cmdline_p)
+{
+#endif
 #ifdef CONFIG_ACPI_BOOT
        /* Initialize the ACPI boot-time table parser */
        acpi_table_init();
@@ -433,20 +438,16 @@ setup_arch (char **cmdline_p)
 
 #ifndef XEN
        find_memory();
-#else
-       io_port_init();
-}
-
-void __init
-late_setup_arch (char **cmdline_p)
-{
-#undef CONFIG_ACPI_BOOT
-       acpi_table_init();
 #endif
+
        /* process SAL system table: */
        ia64_sal_init(efi.sal_systab);
 
 #ifdef CONFIG_SMP
+#ifdef XEN
+       init_smp_config ();
+#endif
+
        cpu_physical_id(0) = hard_smp_processor_id();
 
        cpu_set(0, cpu_sibling_map[0]);
@@ -768,6 +769,11 @@ cpu_init (void)
 
        cpu_data = per_cpu_init();
 
+#ifdef XEN
+       printf ("cpu_init: current=%p, current->domain->arch.mm=%p\n",
+               current, current->domain->arch.mm);
+#endif
+
        /*
         * We set ar.k3 so that assembly code in MCA handler can compute
         * physical addresses of per cpu variables with a simple:
@@ -887,6 +893,16 @@ cpu_init (void)
 #ifndef XEN
        pm_idle = default_idle;
 #endif
+
+#ifdef XEN
+    /* surrender usage of kernel registers to domain, use percpu area instead */
+    __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE);
+    __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA);
+    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK);
+    __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER);
+    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT);
+    __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE);
+#endif
 }
 
 void
index 28c294a24702cd73f84cd41346ba00d3027930e3..74f932137ef0e5453ba4e09f169f59e80c3c81b1 100644 (file)
@@ -63,9 +63,18 @@ void flush_tlb_mask(cpumask_t mask)
 //Huh? This seems to be used on ia64 even if !CONFIG_SMP
 void smp_send_event_check_mask(cpumask_t mask)
 {
-       printf("smp_send_event_check_mask called\n");
-       //dummy();
-       //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
+    int cpu;
+
+    /*  Not for me.  */
+    cpu_clear(smp_processor_id(), mask);
+    if (cpus_empty(mask))
+        return;
+
+    printf("smp_send_event_check_mask called\n");
+
+    for (cpu = 0; cpu < NR_CPUS; ++cpu)
+        if (cpu_isset(cpu, mask))
+           platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
 
 
@@ -249,6 +258,7 @@ send_IPI_self (int op)
        send_IPI_single(smp_processor_id(), op);
 }
 
+#ifndef XEN
 /*
  * Called with preeemption disabled.
  */
@@ -257,6 +267,7 @@ smp_send_reschedule (int cpu)
 {
        platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
+#endif
 
 void
 smp_flush_tlb_all (void)
@@ -395,15 +406,14 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
        if (wait)
                atomic_set(&data.finished, 0);
 
-       printk("smp_call_function: about to spin_lock \n");
        spin_lock(&call_lock);
-       printk("smp_call_function: done with spin_lock \n");
+#if 0 //def XEN
+       printk("smp_call_function: %d lock\n", smp_processor_id ());
+#endif
 
        call_data = &data;
        mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
-       printk("smp_call_function: about to send_IPI \n");
        send_IPI_allbutself(IPI_CALL_FUNC);
-       printk("smp_call_function: done with send_IPI \n");
 
        /* Wait for response */
        while (atomic_read(&data.started) != cpus)
@@ -414,9 +424,10 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
                        cpu_relax();
        call_data = NULL;
 
-       printk("smp_call_function: about to spin_unlock \n");
        spin_unlock(&call_lock);
+#if 0 //def XEN
        printk("smp_call_function: DONE WITH spin_unlock, returning \n");
+#endif
        return 0;
 }
 EXPORT_SYMBOL(smp_call_function);
index 1c0e84e31d370a4822ab175d95e92c325c54500d..89f68296481e5678e878d964ce71e46e725ad0fa 100644 (file)
@@ -477,6 +477,22 @@ do_boot_cpu (int sapicid, int cpu)
 
 do_rest:
        task_for_booting_cpu = c_idle.idle;
+#else
+       struct domain *idle;
+       struct vcpu *v;
+       void *stack;
+
+       if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
+               panic("failed 'createdomain' for CPU %d", cpu);
+       set_bit(_DOMF_idle_domain, &idle->domain_flags);
+       v = idle->vcpu[0];
+
+       printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v);
+
+       task_for_booting_cpu = v;
+
+       /* Set cpu number.  */
+       get_thread_info(v)->cpu = cpu;
 #endif
 
        Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
index 6dbc687b8b15950ac1a63e2b4b8ded60e1656118..f0eab76870a01dd1ddfe02500aac143bb246eff9 100644 (file)
@@ -121,6 +121,7 @@ acpi_get_sysname (void)
 #ifdef CONFIG_ACPI_BOOT
 
 #define ACPI_MAX_PLATFORM_INTERRUPTS   256
+#define NR_IOSAPICS 4
 
 #if 0
 /* Array to record platform interrupt vectors for generic interrupt routing. */
@@ -162,7 +163,6 @@ static int                  available_cpus __initdata;
 struct acpi_table_madt *       acpi_madt __initdata;
 static u8                      has_8259;
 
-#if 0
 static int __init
 acpi_parse_lapic_addr_ovr (
        acpi_table_entry_header *header, const unsigned long end)
@@ -247,12 +247,13 @@ acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end)
 
        acpi_table_print_madt_entry(header);
 
+#if 0
        iosapic_init(iosapic->address, iosapic->global_irq_base);
+#endif
 
        return 0;
 }
 
-
 static int __init
 acpi_parse_plat_int_src (
        acpi_table_entry_header *header, const unsigned long end)
@@ -267,6 +268,7 @@ acpi_parse_plat_int_src (
 
        acpi_table_print_madt_entry(header);
 
+#if 0
        /*
         * Get vector assignment for this interrupt, set attributes,
         * and program the IOSAPIC routing table.
@@ -280,6 +282,7 @@ acpi_parse_plat_int_src (
                                                (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
 
        platform_intr_list[plintsrc->type] = vector;
+#endif
        return 0;
 }
 
@@ -297,13 +300,14 @@ acpi_parse_int_src_ovr (
 
        acpi_table_print_madt_entry(header);
 
+#if 0
        iosapic_override_isa_irq(p->bus_irq, p->global_irq,
                                 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
                                 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
+#endif
        return 0;
 }
 
-
 static int __init
 acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
 {
@@ -331,8 +335,10 @@ void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
                 */
                sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
 
+#if 0
                /*Start cyclone clock*/
                cyclone_setup(0);
+#endif
        }
 }
 
@@ -350,7 +356,9 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
 #else
        has_8259 = acpi_madt->flags.pcat_compat;
 #endif
+#if 0
        iosapic_system_init(has_8259);
+#endif
 
        /* Get base address of IPI Message Block */
 
@@ -364,7 +372,6 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
 
        return 0;
 }
-#endif
 
 #ifdef CONFIG_ACPI_NUMA
 
@@ -529,6 +536,7 @@ acpi_register_gsi (u32 gsi, int polarity, int trigger)
        return acpi_register_irq(gsi, polarity, trigger);
 }
 EXPORT_SYMBOL(acpi_register_gsi);
+#endif
 static int __init
 acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
 {
@@ -550,10 +558,11 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
        if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
                acpi_legacy_devices = 1;
 
+#if 0
        acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
+#endif
        return 0;
 }
-#endif
 
 unsigned long __init
 acpi_find_rsdp (void)
@@ -567,7 +576,6 @@ acpi_find_rsdp (void)
        return rsdp_phys;
 }
 
-#if 0
 int __init
 acpi_boot_init (void)
 {
@@ -646,6 +654,7 @@ acpi_boot_init (void)
        printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
        return 0;
 }
+#if 0
 int
 acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
 {
index 4cd54a0b8c4d323ef55fa18f6d2c07b4e6fd9897..7c9b0ddc11c7e5adc70ecaf6aee99fbe3c24bda7 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/desc.h>
+#include <asm/hw_irq.h>
 //#include <asm/mpspec.h>
 #include <xen/irq.h>
 #include <xen/event.h>
@@ -75,35 +76,21 @@ void free_perdomain_pt(struct domain *d)
        //free_page((unsigned long)d->mm.perdomain_pt);
 }
 
-int hlt_counter;
-
-void disable_hlt(void)
-{
-       hlt_counter++;
-}
-
-void enable_hlt(void)
-{
-       hlt_counter--;
-}
-
 static void default_idle(void)
 {
-       if ( hlt_counter == 0 )
-       {
+       int cpu = smp_processor_id();
        local_irq_disable();
-           if ( !softirq_pending(smp_processor_id()) )
+       if ( !softirq_pending(cpu))
                safe_halt();
-           //else
-               local_irq_enable();
-       }
+       local_irq_enable();
 }
 
-void continue_cpu_idle_loop(void)
+static void continue_cpu_idle_loop(void)
 {
        int cpu = smp_processor_id();
        for ( ; ; )
        {
+       printf ("idle%dD\n", cpu);
 #ifdef IA64
 //        __IRQ_STAT(cpu, idle_timestamp) = jiffies
 #else
@@ -111,23 +98,32 @@ void continue_cpu_idle_loop(void)
 #endif
            while ( !softirq_pending(cpu) )
                default_idle();
+           add_preempt_count(SOFTIRQ_OFFSET);
            raise_softirq(SCHEDULE_SOFTIRQ);
            do_softirq();
+           sub_preempt_count(SOFTIRQ_OFFSET);
        }
 }
 
 void startup_cpu_idle_loop(void)
 {
+       int cpu = smp_processor_id ();
        /* Just some sanity to ensure that the scheduler is set up okay. */
        ASSERT(current->domain == IDLE_DOMAIN_ID);
+       printf ("idle%dA\n", cpu);
        raise_softirq(SCHEDULE_SOFTIRQ);
+#if 0   /* All this work is done within continue_cpu_idle_loop  */
+       printf ("idle%dB\n", cpu);
+       asm volatile ("mov ar.k2=r0");
        do_softirq();
+       printf ("idle%dC\n", cpu);
 
        /*
         * Declares CPU setup done to the boot processor.
         * Therefore memory barrier to ensure state is visible.
         */
        smp_mb();
+#endif
 #if 0
 //do we have to ensure the idle task has a shared page so that, for example,
 //region registers can be loaded from it.  Apparently not...
@@ -229,17 +225,21 @@ void arch_do_createdomain(struct vcpu *v)
        v->arch.breakimm = d->arch.breakimm;
 
        d->arch.sys_pgnr = 0;
-       d->arch.mm = xmalloc(struct mm_struct);
-       if (unlikely(!d->arch.mm)) {
-               printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
-               return -ENOMEM;
-       }
-       memset(d->arch.mm, 0, sizeof(*d->arch.mm));
-       d->arch.mm->pgd = pgd_alloc(d->arch.mm);
-       if (unlikely(!d->arch.mm->pgd)) {
-               printk("Can't allocate pgd for domain %d\n",d->domain_id);
-               return -ENOMEM;
-       }
+       if (d->domain_id != IDLE_DOMAIN_ID) {
+               d->arch.mm = xmalloc(struct mm_struct);
+               if (unlikely(!d->arch.mm)) {
+                       printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
+                       return -ENOMEM;
+               }
+               memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+               d->arch.mm->pgd = pgd_alloc(d->arch.mm);
+               if (unlikely(!d->arch.mm->pgd)) {
+                       printk("Can't allocate pgd for domain %d\n",d->domain_id);
+                       return -ENOMEM;
+               }
+       } else
+               d->arch.mm = NULL;
+       printf ("arch_do_create_domain: domain=%p\n", d);
 }
 
 void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
index b49a94d8a8ec8a50b531d74f6f56decd3910a0c2..14320020325aab97cfef9fa50b10e8fa253614bf 100644 (file)
@@ -62,11 +62,23 @@ long do_iopl(domid_t domain, unsigned int new_io_pl)
        return 0;
 }
 
+#include <xen/sched-if.h>
+
+extern struct schedule_data schedule_data[NR_CPUS];
+
 void schedule_tail(struct vcpu *next)
 {
        unsigned long rr7;
        //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
        //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
+
+    // TG: Real HACK FIXME.
+    // This is currently necessary because when a new domain is started, 
+    // the context_switch function of xen/common/schedule.c(__enter_scheduler)
+    // never returns.  Therefore, the lock must be released.
+    // schedule_tail is only called when a domain is started.
+    spin_unlock_irq(&schedule_data[current->processor].schedule_lock);
+
        /* rr7 will be postponed to last point when resuming back to guest */
     if(VMX_DOMAIN(current)){
        vmx_load_all_rr(current);
index bf4778e01ed308f85725f5049caa630913db71e0..e66b8ac05779a6c9a3d2948d3ddbec6b42dfc2ab 100644 (file)
@@ -35,7 +35,7 @@ xen_debug_irq(ia64_vector vector, struct pt_regs *regs)
 int
 xen_do_IRQ(ia64_vector vector)
 {
-       if (vector != 0xef) {
+       if (vector != IA64_TIMER_VECTOR && vector != IA64_IPI_VECTOR) {
                extern void vcpu_pend_interrupt(void *, int);
 #if 0
                if (firsttime[vector]) {
@@ -57,22 +57,18 @@ xen_do_IRQ(ia64_vector vector)
        return(0);
 }
 
-/* From linux/kernel/softirq.c */
-#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
-# define invoke_softirq()      __do_softirq()
-#else
-# define invoke_softirq()      do_softirq()
-#endif
-
 /*
  * Exit an interrupt context. Process softirqs if needed and possible:
  */
 void irq_exit(void)
 {
        //account_system_vtime(current);
-       //sub_preempt_count(IRQ_EXIT_OFFSET);
-       if (!in_interrupt() && local_softirq_pending())
-               invoke_softirq();
+       sub_preempt_count(IRQ_EXIT_OFFSET);
+       if (!in_interrupt() && local_softirq_pending()) {
+               add_preempt_count(SOFTIRQ_OFFSET);
+               do_softirq();
+               sub_preempt_count(SOFTIRQ_OFFSET);
+       }
        //preempt_enable_no_resched();
 }
 /* end from linux/kernel/softirq.c */
index e25a8fa1f0d7bd3e967de31b68f780e28953a7b0..e62115ebbd31cfa3b1c93e2e836cbccffee84f61 100644 (file)
@@ -280,6 +280,8 @@ void cs01foo(void) {}
 
 unsigned long context_switch_count = 0;
 
+#include <asm/vcpu.h>
+
 void context_switch(struct vcpu *prev, struct vcpu *next)
 {
 //printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
@@ -287,7 +289,8 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 //prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff);
 //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo();
 //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo();
-//printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id);
+printk("@@sw%d/%x %d->%d\n",smp_processor_id(), hard_smp_processor_id (),
+       prev->domain->domain_id,next->domain->domain_id);
     if(VMX_DOMAIN(prev)){
        vtm_domain_out(prev);
     }
index 8b56f9817eb7e25a2d8d4ecaa3487bc6918ec88b..49d11862a568538e240b901aba9b064dacd9516d 100644 (file)
@@ -253,11 +253,11 @@ void start_kernel(void)
 printk("About to call scheduler_init()\n");
     scheduler_init();
     local_irq_disable();
+    init_IRQ ();
 printk("About to call init_xen_time()\n");
     init_xen_time(); /* initialise the time */
 printk("About to call ac_timer_init()\n");
     ac_timer_init();
-// init_xen_time(); ???
 
 #ifdef CONFIG_SMP
     if ( opt_nosmp )
@@ -276,6 +276,9 @@ printk("About to call ac_timer_init()\n");
 
     //BUG_ON(!local_irq_is_enabled());
 
+    /*  Enable IRQ to receive IPI (needed for ITC sync).  */
+    local_irq_enable();
+
 printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
     for_each_present_cpu ( i )
     {
@@ -287,24 +290,16 @@ printk("About to call __cpu_up(%d)\n",i);
        }
     }
 
+    local_irq_disable();
+
     printk("Brought up %ld CPUs\n", (long)num_online_cpus());
     smp_cpus_done(max_cpus);
 #endif
 
-
-       // FIXME: Should the following be swapped and moved later?
-    schedulers_start();
     do_initcalls();
 printk("About to call sort_main_extable()\n");
     sort_main_extable();
 
-    /* surrender usage of kernel registers to domain, use percpu area instead */
-    __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE);
-    __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA);
-    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK);
-    __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER);
-    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT);
-    __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE);
 
     /* Create initial domain 0. */
 printk("About to call do_createdomain()\n");
@@ -342,6 +337,11 @@ printk("About to call construct_dom0()\n");
                         0,
                        0) != 0)
         panic("Could not set up DOM0 guest OS\n");
+
+    /* PIN domain0 on CPU 0.  */
+    dom0->vcpu[0]->cpumap=1;
+    set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags);
+
 #ifdef CLONE_DOMAIN0
     {
     int i;
@@ -379,9 +379,16 @@ printk("About to call init_trace_bufs()\n");
        domain_unpause_by_systemcontroller(clones[i]);
     }
 #endif
-    domain_unpause_by_systemcontroller(dom0);
     domain0_ready = 1;
+
     local_irq_enable();
+
+    printf("About to call schedulers_start dom0=%p, idle0_dom=%p\n",
+          dom0, &idle0_domain);
+    schedulers_start();
+
+    domain_unpause_by_systemcontroller(dom0);
+
 printk("About to call startup_cpu_idle_loop()\n");
     startup_cpu_idle_loop();
 }
index 30f3c111ced8667d132e1fbe63f4ba78f07fd481..75a454b0ed576e203f6ce229ba84dc4c3f4c2e29 100644 (file)
@@ -103,10 +103,10 @@ xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 #ifdef HEARTBEAT_FREQ
        static long count = 0;
        if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
-               printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n",
-                       regs->cr_iip,
+               printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
+                       regs->cr_iip /*,
                        VCPU(current,interrupt_delivery_enabled),
-                       VCPU(current,pending_interruption));
+                       VCPU(current,pending_interruption) */);
                count = 0;
        }
 #endif
index 1d3ba8dabdfab5f46c05c474d889ce99a7a3e81e..10a9b387398bbd4c3ff146654a7270fe54e52c43 100644 (file)
@@ -28,8 +28,8 @@
 
 #ifdef CONFIG_XEN_SMP
 #define CONFIG_SMP 1
-#define NR_CPUS 2
-#define CONFIG_NR_CPUS 2
+#define NR_CPUS 8
+#define CONFIG_NR_CPUS 8
 #else
 #undef CONFIG_SMP
 #define NR_CPUS 1
@@ -123,8 +123,7 @@ extern char _end[]; /* standard ELF symbol */
 #ifdef CONFIG_SMP
 #warning "Lots of things to fix to enable CONFIG_SMP!"
 #endif
-// FIXME SMP
-#define        get_cpu()       0
+#define        get_cpu()       smp_processor_id()
 #define put_cpu()      do {} while(0)
 
 // needed for common/dom0_ops.c until hyperthreading is supported
@@ -140,6 +139,7 @@ struct page;
 // function calls; see decl in xen/include/xen/sched.h
 #undef free_task_struct
 #undef alloc_task_struct
+#define get_thread_info(v) alloc_thread_info(v)
 
 // initial task has a different name in Xen
 //#define      idle0_task      init_task
@@ -299,7 +299,11 @@ extern int ht_per_core;
 #endif /* __XEN_IA64_CONFIG_H__ */
 
 // needed for include/xen/smp.h
+#ifdef CONFIG_SMP
+#define __smp_processor_id()   current_thread_info()->cpu
+#else
 #define __smp_processor_id()   0
+#endif
 
 
 // FOLLOWING ADDED FOR XEN POST-NGIO and/or LINUX 2.6.7
index c2d32e7a4868ca6664ace8df8bc63ea1755439db..1c2034c80d9d09054ab7a31d6d61f81013ac576c 100644 (file)
 #include <asm/intrinsics.h>
 #include <asm/system.h>
 
+#define DEBUG_SPINLOCK
+
 typedef struct {
        volatile unsigned int lock;
 #ifdef CONFIG_PREEMPT
        unsigned int break_lock;
 #endif
+#ifdef DEBUG_SPINLOCK
+       void *locker;
+#endif
 #ifdef XEN
        unsigned char recurse_cpu;
        unsigned char recurse_cnt;
@@ -96,6 +101,10 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
                      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
 # endif /* CONFIG_MCKINLEY */
 #endif
+
+#ifdef DEBUG_SPINLOCK
+       asm volatile ("mov %0=ip" : "=r" (lock->locker));
+#endif
 }
 #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
 #else /* !ASM_SUPPORTED */
index 24314917263745df4c6debddd350accefcd90706..b076644e248dff4c7bf0f5c3ffcbd9b21705ae6d 100644 (file)
  */
 #define in_irq()               (hardirq_count())
 #define in_softirq()           (softirq_count())
-#ifdef XEN
-#define in_interrupt()         0               // FIXME SMP LATER
-#else
 #define in_interrupt()         (irq_count())
-#endif
 
 #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
 # define in_atomic()   ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
index caea47d6410554f476243477ae9a42b40b391a05..b88e9457f1f53b58cc75ad81df789fdd329b5bc6 100644 (file)
@@ -88,6 +88,7 @@ static inline void __deprecated save_and_cli(unsigned long *x)
 #define save_and_cli(x)        save_and_cli(&x)
 #endif /* CONFIG_SMP */
 
+#ifndef XEN
 /* SoftIRQ primitives.  */
 #define local_bh_disable() \
                do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
@@ -95,6 +96,7 @@ static inline void __deprecated save_and_cli(unsigned long *x)
                do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
 
 extern void local_bh_enable(void);
+#endif
 
 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
    frequency threaded job scheduling. For almost all the purposes